Object Detection via Semantic Segmentation

Import Modules

In [1]:
# IMPORT MODULES
# Import Import Numpy, TensorFlow, Scipy, Keras
import sys
import time
import os
import numpy as np 
import pandas as pd 
from glob import glob
import cv2
import matplotlib.pyplot as plt
from skimage.segmentation import mark_boundaries
import pickle
import math
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy.signal import find_peaks_cwt
import random
from sklearn.utils import shuffle
import csv
import tensorflow as tf
from tensorflow.contrib.layers import flatten
from tensorflow.examples.tutorials.mnist import input_data
import keras
from keras.datasets import cifar10
from keras.models import Sequential,model_from_json
from keras.layers import Dense,Dropout,Activation,Flatten
from keras.layers import Convolution2D,MaxPooling2D
from keras.layers import Flatten,Lambda,ELU
from keras.optimizers import SGD,Adam,RMSprop
from keras.layers.convolutional import Conv2D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from keras.initializers import glorot_uniform
from sklearn.cross_validation import train_test_split
import scipy.misc
from matplotlib.pyplot import imshow
%matplotlib inline
from IPython.display import SVG
import json
from keras.models import Sequential, model_from_json
print('Import Modules')
Import Modules
Using TensorFlow backend.

Load Data

In [2]:
# LOAD DATA
# Change Directory
os.chdir('..')
In [3]:
# LOAD DATA
# Find Files
all_paths = pd.DataFrame(dict(Path = glob(os.path.join('Data','*', '*.*p*g'))))
all_paths['Split'] = all_paths['Path'].map(lambda x: x.split('/')[-2].split('-')[0])
all_paths['Group'] = all_paths['Path'].map(lambda x: x.split('/')[-2].split('-')[-1])
all_paths['ID'] = all_paths['Path'].map(lambda x: '_'.join(os.path.splitext(os.path.basename(x))[0].split('_')[:4]))
In [4]:
# LOAD DATA
# Display Samples
all_paths.sample(5)
Out[4]:
Path Split Group ID
54504 Data/Train-Label/170908_072820973_Camera_6_ins... Train Label 170908_072820973_Camera_6
57957 Data/Train-Label/170927_070435552_Camera_6_ins... Train Label 170927_070435552_Camera_6
13585 Data/Train-Color/170908_075340591_Camera_6.jpg Train Color 170908_075340591_Camera_6
58010 Data/Train-Label/170908_075423203_Camera_5_ins... Train Label 170908_075423203_Camera_5
51822 Data/Train-Label/170908_065012481_Camera_5_ins... Train Label 170908_065012481_Camera_5
In [5]:
# LOAD DATA
# Pivot Data
group_df = all_paths.pivot_table(values = 'Path', columns = 'Group', \
                                 aggfunc = 'first', index = ['ID', 'Split']).reset_index()
In [6]:
# LOAD DATA
# Display Samples
group_df.sample(5)
Out[6]:
Group ID Split Color Label
26545 171206_025859115_Camera_5 Train Data/Train-Color/171206_025859115_Camera_5.jpg Data/Train-Label/171206_025859115_Camera_5_ins...
32877 171206_033000126_Camera_5 Train Data/Train-Color/171206_033000126_Camera_5.jpg Data/Train-Label/171206_033000126_Camera_5_ins...
38570 171206_034419463_Camera_5 Train Data/Train-Color/171206_034419463_Camera_5.jpg Data/Train-Label/171206_034419463_Camera_5_ins...
25461 170927_070423029_Camera_6 Train Data/Train-Color/170927_070423029_Camera_6.jpg Data/Train-Label/170927_070423029_Camera_6_ins...
35245 171206_033609954_Camera_6 Train Data/Train-Color/171206_033609954_Camera_6.jpg Data/Train-Label/171206_033609954_Camera_6_ins...
In [7]:
# LOAD DATA
# Load Training Data Files
training_data_files = group_df.query('Split=="Train"')
data_size = training_data_files.shape[0]
print('Size of Training Data', data_size)
Size of Training Data 39222

Explore Data

In [8]:
# EXPLORE DATA
# Load Images in Loop and Display
for i in range(1020,1030):
    # Load Images
    input_img = cv2.imread(training_data_files['Color'][i])
    label_img = cv2.imread(training_data_files['Label'][i])
    
    # Crop
    input_img = input_img[1200:2400,:]
    label_img = label_img[1200:2400,:]
    
    # Convert to Grayscale
    label_img = cv2.cvtColor(label_img, cv2.COLOR_RGB2GRAY)
    
    # Resize Image to 100x100
    input_img = cv2.resize(input_img, (560, 200))
    label_img = cv2.resize(label_img, (560, 200))
    
    # Plot
    fig,(axis1,axis2)=plt.subplots(1,2,figsize=(15,15))
    axis1.imshow(input_img)
    axis1.set_title('Image',fontsize=7.5)
    axis2.imshow(label_img)
    axis2.set_title('Label',fontsize=7.5)

Preprocess Data

In [9]:
# PREPROCESS DATA
# Clear Memory
del all_paths
del group_df
In [10]:
# PREPROCESS DATA
# Load Input Images
x_train = []
for i in range(10000):
    # Load Images
    input_img = cv2.imread(training_data_files['Color'][i])
    
    # Crop
    input_img = input_img[1200:2400,:]
    
    # Resize Image to 100x280
    input_img = cv2.resize(input_img, (288, 288))
    
    # Flatten Image and Append 
    image_flat = input_img.flatten().tolist()
    x_train += image_flat
    
    # Check Progress
    if (i%100 == 0):
        print('Images Loaded:', i)

# Check Size of Data
image_feature_size=int(len(x_train))
print("Feature Size:", image_feature_size)

# Revert to the Original Image Shapes
x_train = np.array(x_train).reshape(10000, 288, 288, 3)
print("Feature Size:", x_train.shape)
Images Loaded: 0
Images Loaded: 100
Images Loaded: 200
Images Loaded: 300
Images Loaded: 400
Images Loaded: 500
Images Loaded: 600
Images Loaded: 700
Images Loaded: 800
Images Loaded: 900
Images Loaded: 1000
Images Loaded: 1100
Images Loaded: 1200
Images Loaded: 1300
Images Loaded: 1400
Images Loaded: 1500
Images Loaded: 1600
Images Loaded: 1700
Images Loaded: 1800
Images Loaded: 1900
Images Loaded: 2000
Images Loaded: 2100
Images Loaded: 2200
Images Loaded: 2300
Images Loaded: 2400
Images Loaded: 2500
Images Loaded: 2600
Images Loaded: 2700
Images Loaded: 2800
Images Loaded: 2900
Images Loaded: 3000
Images Loaded: 3100
Images Loaded: 3200
Images Loaded: 3300
Images Loaded: 3400
Images Loaded: 3500
Images Loaded: 3600
Images Loaded: 3700
Images Loaded: 3800
Images Loaded: 3900
Images Loaded: 4000
Images Loaded: 4100
Images Loaded: 4200
Images Loaded: 4300
Images Loaded: 4400
Images Loaded: 4500
Images Loaded: 4600
Images Loaded: 4700
Images Loaded: 4800
Images Loaded: 4900
Images Loaded: 5000
Images Loaded: 5100
Images Loaded: 5200
Images Loaded: 5300
Images Loaded: 5400
Images Loaded: 5500
Images Loaded: 5600
Images Loaded: 5700
Images Loaded: 5800
Images Loaded: 5900
Images Loaded: 6000
Images Loaded: 6100
Images Loaded: 6200
Images Loaded: 6300
Images Loaded: 6400
Images Loaded: 6500
Images Loaded: 6600
Images Loaded: 6700
Images Loaded: 6800
Images Loaded: 6900
Images Loaded: 7000
Images Loaded: 7100
Images Loaded: 7200
Images Loaded: 7300
Images Loaded: 7400
Images Loaded: 7500
Images Loaded: 7600
Images Loaded: 7700
Images Loaded: 7800
Images Loaded: 7900
Images Loaded: 8000
Images Loaded: 8100
Images Loaded: 8200
Images Loaded: 8300
Images Loaded: 8400
Images Loaded: 8500
Images Loaded: 8600
Images Loaded: 8700
Images Loaded: 8800
Images Loaded: 8900
Images Loaded: 9000
Images Loaded: 9100
Images Loaded: 9200
Images Loaded: 9300
Images Loaded: 9400
Images Loaded: 9500
Images Loaded: 9600
Images Loaded: 9700
Images Loaded: 9800
Images Loaded: 9900
Feature Size: 2488320000
Feature Size: (10000, 288, 288, 3)
In [11]:
# PREPROCESS DATA
# Load Label Images
y_train = []
for i in range(10000):
    # Load Images
    label_img = cv2.imread(training_data_files['Label'][i])
    
    # Crop
    label_img = label_img[1200:2400,:]
    
    # Convert to Grayscale
    label_img = cv2.cvtColor(label_img, cv2.COLOR_RGB2GRAY)
    
    # Resize Image to 100x280
    label_img = cv2.resize(label_img, (288, 288))
    
    # Flatten Image and Append 
    label_flat = label_img.flatten().tolist()
    y_train += label_flat
    
    # Check Progress
    if (i%100 == 0):
        print('Images Loaded:', i)

# Check Size of Data
label_feature_size=int(len(y_train))
print("Feature Size:", label_feature_size)

# Revert to the Original Image Shapes
y_train = np.array(y_train).reshape(10000, 288, 288, 1)
print("Feature Size:", y_train.shape)
Images Loaded: 0
Images Loaded: 100
Images Loaded: 200
Images Loaded: 300
Images Loaded: 400
Images Loaded: 500
Images Loaded: 600
Images Loaded: 700
Images Loaded: 800
Images Loaded: 900
Images Loaded: 1000
Images Loaded: 1100
Images Loaded: 1200
Images Loaded: 1300
Images Loaded: 1400
Images Loaded: 1500
Images Loaded: 1600
Images Loaded: 1700
Images Loaded: 1800
Images Loaded: 1900
Images Loaded: 2000
Images Loaded: 2100
Images Loaded: 2200
Images Loaded: 2300
Images Loaded: 2400
Images Loaded: 2500
Images Loaded: 2600
Images Loaded: 2700
Images Loaded: 2800
Images Loaded: 2900
Images Loaded: 3000
Images Loaded: 3100
Images Loaded: 3200
Images Loaded: 3300
Images Loaded: 3400
Images Loaded: 3500
Images Loaded: 3600
Images Loaded: 3700
Images Loaded: 3800
Images Loaded: 3900
Images Loaded: 4000
Images Loaded: 4100
Images Loaded: 4200
Images Loaded: 4300
Images Loaded: 4400
Images Loaded: 4500
Images Loaded: 4600
Images Loaded: 4700
Images Loaded: 4800
Images Loaded: 4900
Images Loaded: 5000
Images Loaded: 5100
Images Loaded: 5200
Images Loaded: 5300
Images Loaded: 5400
Images Loaded: 5500
Images Loaded: 5600
Images Loaded: 5700
Images Loaded: 5800
Images Loaded: 5900
Images Loaded: 6000
Images Loaded: 6100
Images Loaded: 6200
Images Loaded: 6300
Images Loaded: 6400
Images Loaded: 6500
Images Loaded: 6600
Images Loaded: 6700
Images Loaded: 6800
Images Loaded: 6900
Images Loaded: 7000
Images Loaded: 7100
Images Loaded: 7200
Images Loaded: 7300
Images Loaded: 7400
Images Loaded: 7500
Images Loaded: 7600
Images Loaded: 7700
Images Loaded: 7800
Images Loaded: 7900
Images Loaded: 8000
Images Loaded: 8100
Images Loaded: 8200
Images Loaded: 8300
Images Loaded: 8400
Images Loaded: 8500
Images Loaded: 8600
Images Loaded: 8700
Images Loaded: 8800
Images Loaded: 8900
Images Loaded: 9000
Images Loaded: 9100
Images Loaded: 9200
Images Loaded: 9300
Images Loaded: 9400
Images Loaded: 9500
Images Loaded: 9600
Images Loaded: 9700
Images Loaded: 9800
Images Loaded: 9900
Feature Size: 829440000
Feature Size: (10000, 288, 288, 1)
In [12]:
# PREPROCESS DATA
# Load Input Images
x_valid = []
for i in range(2000):
    # Load Images
    input_img = cv2.imread(training_data_files['Color'][i])
    
    # Crop
    input_img = input_img[1200:2400,:]
    
    # Resize Image to 100x280
    input_img = cv2.resize(input_img, (288, 288))
    
    # Flatten Image and Append 
    image_flat = input_img.flatten().tolist()
    x_valid += image_flat
    
    # Check Progress
    if (i%100 == 0):
        print('Images Loaded:', i)

# Check Size of Data
image_feature_size=int(len(x_valid))
print("Feature Size:", image_feature_size)

# Revert to the Original Image Shapes
x_valid = np.array(x_valid).reshape(2000, 288, 288, 3)
print("Feature Size:", x_valid.shape)
Images Loaded: 0
Images Loaded: 100
Images Loaded: 200
Images Loaded: 300
Images Loaded: 400
Images Loaded: 500
Images Loaded: 600
Images Loaded: 700
Images Loaded: 800
Images Loaded: 900
Images Loaded: 1000
Images Loaded: 1100
Images Loaded: 1200
Images Loaded: 1300
Images Loaded: 1400
Images Loaded: 1500
Images Loaded: 1600
Images Loaded: 1700
Images Loaded: 1800
Images Loaded: 1900
Feature Size: 497664000
Feature Size: (2000, 288, 288, 3)
In [13]:
# PREPROCESS DATA
# Load Label Images
y_valid = []
for i in range(2000):
    # Load Images
    label_img = cv2.imread(training_data_files['Label'][i])
    
    # Crop
    label_img = label_img[1200:2400,:]
    
    # Convert to Grayscale
    label_img = cv2.cvtColor(label_img, cv2.COLOR_RGB2GRAY)
    
    # Resize Image to 100x280
    label_img = cv2.resize(label_img, (288, 288))
    
    # Flatten Image and Append 
    label_flat = label_img.flatten().tolist()
    y_valid += label_flat
    
    # Check Progress
    if (i%100 == 0):
        print('Images Loaded:', i)

# Check Size of Data
label_feature_size=int(len(y_valid))
print("Feature Size:", label_feature_size)

# Revert to the Original Image Shapes
y_valid = np.array(y_valid).reshape(2000, 288, 288, 1)
print("Feature Size:", y_valid.shape)
Images Loaded: 0
Images Loaded: 100
Images Loaded: 200
Images Loaded: 300
Images Loaded: 400
Images Loaded: 500
Images Loaded: 600
Images Loaded: 700
Images Loaded: 800
Images Loaded: 900
Images Loaded: 1000
Images Loaded: 1100
Images Loaded: 1200
Images Loaded: 1300
Images Loaded: 1400
Images Loaded: 1500
Images Loaded: 1600
Images Loaded: 1700
Images Loaded: 1800
Images Loaded: 1900
Feature Size: 165888000
Feature Size: (2000, 288, 288, 1)
In [14]:
# PREPROCESS DATA
# Clear Memory
del training_data_files

Create Model

In [15]:
# CREATE MODEL
# Import Modules
from keras.models import Model, load_model
from keras.layers import Input, BatchNormalization, Dropout, Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.layers import UpSampling2D
import time
from skimage import data, color
from skimage.transform import rescale, resize, downscale_local_mean

# Define a Function for Convolutional Block
def conv2d_block(input_tensor, n_filters, kernel_size=3, batchnorm=True):
    # Add First Layer
    x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal",
               padding="same")(input_tensor)
    if batchnorm:
        x = BatchNormalization()(x)
    x = Activation("relu")(x)
    
    # Add Second Layer
    x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal",
               padding="same")(x)
    if batchnorm:
        x = BatchNormalization()(x)
    x = Activation("relu")(x)
    return x

# Define a Function for U-Net Architecture
def vanilla_unet(input_img, n_filters=16, dropout=0.5, batchnorm=True):
    # Add Contracting Path
    c1 = conv2d_block(input_img, n_filters=n_filters*1, kernel_size=3, batchnorm=batchnorm)
    p1 = MaxPooling2D((2, 2)) (c1)
    p1 = Dropout(dropout*0.5)(p1)

    c2 = conv2d_block(p1, n_filters=n_filters*2, kernel_size=3, batchnorm=batchnorm)
    p2 = MaxPooling2D((2, 2)) (c2)
    p2 = Dropout(dropout)(p2)

    c3 = conv2d_block(p2, n_filters=n_filters*4, kernel_size=3, batchnorm=batchnorm)
    p3 = MaxPooling2D((2, 2)) (c3)
    p3 = Dropout(dropout)(p3)

    c4 = conv2d_block(p3, n_filters=n_filters*8, kernel_size=3, batchnorm=batchnorm)
    p4 = MaxPooling2D(pool_size=(2, 2)) (c4)
    p4 = Dropout(dropout)(p4)
    
    c5 = conv2d_block(p4, n_filters=n_filters*16, kernel_size=3, batchnorm=batchnorm)
    
    # Add Expansive Path
    u6 = Conv2DTranspose(n_filters*8, (3, 3), strides=(2, 2), padding='same') (c5)
    u6 = concatenate([u6, c4])
    u6 = Dropout(dropout)(u6)
    c6 = conv2d_block(u6, n_filters=n_filters*8, kernel_size=3, batchnorm=batchnorm)

    u7 = Conv2DTranspose(n_filters*4, (3, 3), strides=(2, 2), padding='same') (c6)
    u7 = concatenate([u7, c3])
    u7 = Dropout(dropout)(u7)
    c7 = conv2d_block(u7, n_filters=n_filters*4, kernel_size=3, batchnorm=batchnorm)

    u8 = Conv2DTranspose(n_filters*2, (3, 3), strides=(2, 2), padding='same') (c7)
    u8 = concatenate([u8, c2])
    u8 = Dropout(dropout)(u8)
    c8 = conv2d_block(u8, n_filters=n_filters*2, kernel_size=3, batchnorm=batchnorm)

    u9 = Conv2DTranspose(n_filters*1, (3, 3), strides=(2, 2), padding='same') (c8)
    u9 = concatenate([u9, c1], axis=3)
    u9 = Dropout(dropout)(u9)
    c9 = conv2d_block(u9, n_filters=n_filters*1, kernel_size=3, batchnorm=batchnorm)
    
    outputs = Conv2D(1, (1, 1), activation='sigmoid') (c9)
    model = Model(inputs=[input_img], outputs=[outputs])
    return model
In [16]:
# CREATE NETWORK
# Import Modules
import tensorflow as tf
from keras import backend as K

# Define a Function for Dice Coefficient Computation
def dice_coef(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) + 1.0)

# Define a Function for Dice Coefficient Loss
def dice_coef_loss(y_true, y_pred):
    return -dice_coef(y_true, y_pred)
In [17]:
# CREATE MODEL
# Instantiate Network
input_img = Input((288, 288, 3), name='img')
model = vanilla_unet(input_img, n_filters=32, dropout=0.05, batchnorm=True)
model.compile(optimizer = Adam(), loss = dice_coef_loss, metrics = ["accuracy", "binary_accuracy", "mse"])

# Print Summary
model.summary()
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
img (InputLayer)                (None, 288, 288, 3)  0                                            
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, 288, 288, 32) 896         img[0][0]                        
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 288, 288, 32) 128         conv2d_1[0][0]                   
__________________________________________________________________________________________________
activation_1 (Activation)       (None, 288, 288, 32) 0           batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, 288, 288, 32) 9248        activation_1[0][0]               
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 288, 288, 32) 128         conv2d_2[0][0]                   
__________________________________________________________________________________________________
activation_2 (Activation)       (None, 288, 288, 32) 0           batch_normalization_2[0][0]      
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)  (None, 144, 144, 32) 0           activation_2[0][0]               
__________________________________________________________________________________________________
dropout_1 (Dropout)             (None, 144, 144, 32) 0           max_pooling2d_1[0][0]            
__________________________________________________________________________________________________
conv2d_3 (Conv2D)               (None, 144, 144, 64) 18496       dropout_1[0][0]                  
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 144, 144, 64) 256         conv2d_3[0][0]                   
__________________________________________________________________________________________________
activation_3 (Activation)       (None, 144, 144, 64) 0           batch_normalization_3[0][0]      
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, 144, 144, 64) 36928       activation_3[0][0]               
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 144, 144, 64) 256         conv2d_4[0][0]                   
__________________________________________________________________________________________________
activation_4 (Activation)       (None, 144, 144, 64) 0           batch_normalization_4[0][0]      
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D)  (None, 72, 72, 64)   0           activation_4[0][0]               
__________________________________________________________________________________________________
dropout_2 (Dropout)             (None, 72, 72, 64)   0           max_pooling2d_2[0][0]            
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, 72, 72, 128)  73856       dropout_2[0][0]                  
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 72, 72, 128)  512         conv2d_5[0][0]                   
__________________________________________________________________________________________________
activation_5 (Activation)       (None, 72, 72, 128)  0           batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, 72, 72, 128)  147584      activation_5[0][0]               
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 72, 72, 128)  512         conv2d_6[0][0]                   
__________________________________________________________________________________________________
activation_6 (Activation)       (None, 72, 72, 128)  0           batch_normalization_6[0][0]      
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D)  (None, 36, 36, 128)  0           activation_6[0][0]               
__________________________________________________________________________________________________
dropout_3 (Dropout)             (None, 36, 36, 128)  0           max_pooling2d_3[0][0]            
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, 36, 36, 256)  295168      dropout_3[0][0]                  
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 36, 36, 256)  1024        conv2d_7[0][0]                   
__________________________________________________________________________________________________
activation_7 (Activation)       (None, 36, 36, 256)  0           batch_normalization_7[0][0]      
__________________________________________________________________________________________________
conv2d_8 (Conv2D)               (None, 36, 36, 256)  590080      activation_7[0][0]               
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 36, 36, 256)  1024        conv2d_8[0][0]                   
__________________________________________________________________________________________________
activation_8 (Activation)       (None, 36, 36, 256)  0           batch_normalization_8[0][0]      
__________________________________________________________________________________________________
max_pooling2d_4 (MaxPooling2D)  (None, 18, 18, 256)  0           activation_8[0][0]               
__________________________________________________________________________________________________
dropout_4 (Dropout)             (None, 18, 18, 256)  0           max_pooling2d_4[0][0]            
__________________________________________________________________________________________________
conv2d_9 (Conv2D)               (None, 18, 18, 512)  1180160     dropout_4[0][0]                  
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 18, 18, 512)  2048        conv2d_9[0][0]                   
__________________________________________________________________________________________________
activation_9 (Activation)       (None, 18, 18, 512)  0           batch_normalization_9[0][0]      
__________________________________________________________________________________________________
conv2d_10 (Conv2D)              (None, 18, 18, 512)  2359808     activation_9[0][0]               
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 18, 18, 512)  2048        conv2d_10[0][0]                  
__________________________________________________________________________________________________
activation_10 (Activation)      (None, 18, 18, 512)  0           batch_normalization_10[0][0]     
__________________________________________________________________________________________________
conv2d_transpose_1 (Conv2DTrans (None, 36, 36, 256)  1179904     activation_10[0][0]              
__________________________________________________________________________________________________
concatenate_1 (Concatenate)     (None, 36, 36, 512)  0           conv2d_transpose_1[0][0]         
                                                                 activation_8[0][0]               
__________________________________________________________________________________________________
dropout_5 (Dropout)             (None, 36, 36, 512)  0           concatenate_1[0][0]              
__________________________________________________________________________________________________
conv2d_11 (Conv2D)              (None, 36, 36, 256)  1179904     dropout_5[0][0]                  
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 36, 36, 256)  1024        conv2d_11[0][0]                  
__________________________________________________________________________________________________
activation_11 (Activation)      (None, 36, 36, 256)  0           batch_normalization_11[0][0]     
__________________________________________________________________________________________________
conv2d_12 (Conv2D)              (None, 36, 36, 256)  590080      activation_11[0][0]              
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 36, 36, 256)  1024        conv2d_12[0][0]                  
__________________________________________________________________________________________________
activation_12 (Activation)      (None, 36, 36, 256)  0           batch_normalization_12[0][0]     
__________________________________________________________________________________________________
conv2d_transpose_2 (Conv2DTrans (None, 72, 72, 128)  295040      activation_12[0][0]              
__________________________________________________________________________________________________
concatenate_2 (Concatenate)     (None, 72, 72, 256)  0           conv2d_transpose_2[0][0]         
                                                                 activation_6[0][0]               
__________________________________________________________________________________________________
dropout_6 (Dropout)             (None, 72, 72, 256)  0           concatenate_2[0][0]              
__________________________________________________________________________________________________
conv2d_13 (Conv2D)              (None, 72, 72, 128)  295040      dropout_6[0][0]                  
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 72, 72, 128)  512         conv2d_13[0][0]                  
__________________________________________________________________________________________________
activation_13 (Activation)      (None, 72, 72, 128)  0           batch_normalization_13[0][0]     
__________________________________________________________________________________________________
conv2d_14 (Conv2D)              (None, 72, 72, 128)  147584      activation_13[0][0]              
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 72, 72, 128)  512         conv2d_14[0][0]                  
__________________________________________________________________________________________________
activation_14 (Activation)      (None, 72, 72, 128)  0           batch_normalization_14[0][0]     
__________________________________________________________________________________________________
conv2d_transpose_3 (Conv2DTrans (None, 144, 144, 64) 73792       activation_14[0][0]              
__________________________________________________________________________________________________
concatenate_3 (Concatenate)     (None, 144, 144, 128 0           conv2d_transpose_3[0][0]         
                                                                 activation_4[0][0]               
__________________________________________________________________________________________________
dropout_7 (Dropout)             (None, 144, 144, 128 0           concatenate_3[0][0]              
__________________________________________________________________________________________________
conv2d_15 (Conv2D)              (None, 144, 144, 64) 73792       dropout_7[0][0]                  
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 144, 144, 64) 256         conv2d_15[0][0]                  
__________________________________________________________________________________________________
activation_15 (Activation)      (None, 144, 144, 64) 0           batch_normalization_15[0][0]     
__________________________________________________________________________________________________
conv2d_16 (Conv2D)              (None, 144, 144, 64) 36928       activation_15[0][0]              
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 144, 144, 64) 256         conv2d_16[0][0]                  
__________________________________________________________________________________________________
activation_16 (Activation)      (None, 144, 144, 64) 0           batch_normalization_16[0][0]     
__________________________________________________________________________________________________
conv2d_transpose_4 (Conv2DTrans (None, 288, 288, 32) 18464       activation_16[0][0]              
__________________________________________________________________________________________________
concatenate_4 (Concatenate)     (None, 288, 288, 64) 0           conv2d_transpose_4[0][0]         
                                                                 activation_2[0][0]               
__________________________________________________________________________________________________
dropout_8 (Dropout)             (None, 288, 288, 64) 0           concatenate_4[0][0]              
__________________________________________________________________________________________________
conv2d_17 (Conv2D)              (None, 288, 288, 32) 18464       dropout_8[0][0]                  
__________________________________________________________________________________________________
batch_normalization_17 (BatchNo (None, 288, 288, 32) 128         conv2d_17[0][0]                  
__________________________________________________________________________________________________
activation_17 (Activation)      (None, 288, 288, 32) 0           batch_normalization_17[0][0]     
__________________________________________________________________________________________________
conv2d_18 (Conv2D)              (None, 288, 288, 32) 9248        activation_17[0][0]              
__________________________________________________________________________________________________
batch_normalization_18 (BatchNo (None, 288, 288, 32) 128         conv2d_18[0][0]                  
__________________________________________________________________________________________________
activation_18 (Activation)      (None, 288, 288, 32) 0           batch_normalization_18[0][0]     
__________________________________________________________________________________________________
conv2d_19 (Conv2D)              (None, 288, 288, 1)  33          activation_18[0][0]              
==================================================================================================
Total params: 8,642,273
Trainable params: 8,636,385
Non-trainable params: 5,888
__________________________________________________________________________________________________
In [18]:
# CREATE MODEL
# Import Modules
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau

# Create Callbacks
callbacks = [
    EarlyStopping(patience=10, verbose=1),
    ReduceLROnPlateau(factor=0.1, patience=3, min_lr=0.00001, verbose=1),
    ModelCheckpoint('model-tgs-salt-dice-15-10-2018.h5', verbose=1, save_best_only=True, save_weights_only=True)
]

Set GPU Settings

In [19]:
# SET GPU SETTINGS
# Configure
config=tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allocator_type='BFC'
config.gpu_options.per_process_gpu_memory_fraction=0.80

Train

In [20]:
# TRAIN
# Train the Network
results = model.fit(x_train, y_train, batch_size=16, epochs=150, callbacks=callbacks,
                    validation_data=(x_valid, y_valid))
Train on 10000 samples, validate on 2000 samples
Epoch 1/150
10000/10000 [==============================] - 421s 42ms/step - loss: -1.6981 - acc: 0.6492 - binary_accuracy: 0.6492 - mean_squared_error: 462.2163 - val_loss: -1.1583 - val_acc: 0.7304 - val_binary_accuracy: 0.7304 - val_mean_squared_error: 150.1620

Epoch 00001: val_loss improved from inf to -1.15830, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 2/150
10000/10000 [==============================] - 396s 40ms/step - loss: -1.7894 - acc: 0.7619 - binary_accuracy: 0.7619 - mean_squared_error: 462.0464 - val_loss: -0.9515 - val_acc: 0.5409 - val_binary_accuracy: 0.5409 - val_mean_squared_error: 150.3250

Epoch 00002: val_loss did not improve from -1.15830
Epoch 3/150
10000/10000 [==============================] - 338s 34ms/step - loss: -1.8348 - acc: 0.8162 - binary_accuracy: 0.8162 - mean_squared_error: 461.9421 - val_loss: -1.4401 - val_acc: 0.8746 - val_binary_accuracy: 0.8746 - val_mean_squared_error: 150.0313

Epoch 00003: val_loss improved from -1.15830 to -1.44015, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 4/150
10000/10000 [==============================] - 350s 35ms/step - loss: -1.8437 - acc: 0.8312 - binary_accuracy: 0.8312 - mean_squared_error: 461.9210 - val_loss: -1.2365 - val_acc: 0.8416 - val_binary_accuracy: 0.8416 - val_mean_squared_error: 150.1457

Epoch 00004: val_loss did not improve from -1.44015
Epoch 5/150
10000/10000 [==============================] - 343s 34ms/step - loss: -1.8753 - acc: 0.8681 - binary_accuracy: 0.8681 - mean_squared_error: 461.8591 - val_loss: -1.6651 - val_acc: 0.9395 - val_binary_accuracy: 0.9395 - val_mean_squared_error: 149.9573

Epoch 00005: val_loss improved from -1.44015 to -1.66512, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 6/150
10000/10000 [==============================] - 338s 34ms/step - loss: -1.8791 - acc: 0.8748 - binary_accuracy: 0.8748 - mean_squared_error: 461.8541 - val_loss: -1.4566 - val_acc: 0.8672 - val_binary_accuracy: 0.8672 - val_mean_squared_error: 150.0234

Epoch 00006: val_loss did not improve from -1.66512
Epoch 7/150
10000/10000 [==============================] - 426s 43ms/step - loss: -1.8837 - acc: 0.8797 - binary_accuracy: 0.8797 - mean_squared_error: 461.8435 - val_loss: -1.6376 - val_acc: 0.9092 - val_binary_accuracy: 0.9092 - val_mean_squared_error: 149.9757

Epoch 00007: val_loss did not improve from -1.66512
Epoch 8/150
10000/10000 [==============================] - 489s 49ms/step - loss: -1.9007 - acc: 0.8925 - binary_accuracy: 0.8925 - mean_squared_error: 461.7870 - val_loss: -1.6962 - val_acc: 0.9459 - val_binary_accuracy: 0.9459 - val_mean_squared_error: 149.9512

Epoch 00008: val_loss improved from -1.66512 to -1.69620, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 9/150
10000/10000 [==============================] - 586s 59ms/step - loss: -1.9056 - acc: 0.8994 - binary_accuracy: 0.8994 - mean_squared_error: 461.7807 - val_loss: -1.2199 - val_acc: 0.7913 - val_binary_accuracy: 0.7913 - val_mean_squared_error: 150.0890

Epoch 00009: val_loss did not improve from -1.69620
Epoch 10/150
10000/10000 [==============================] - 579s 58ms/step - loss: -1.9163 - acc: 0.9093 - binary_accuracy: 0.9093 - mean_squared_error: 461.7499 - val_loss: -1.7022 - val_acc: 0.9296 - val_binary_accuracy: 0.9296 - val_mean_squared_error: 149.9511

Epoch 00010: val_loss improved from -1.69620 to -1.70216, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 11/150
10000/10000 [==============================] - 661s 66ms/step - loss: -1.9154 - acc: 0.9110 - binary_accuracy: 0.9110 - mean_squared_error: 461.7696 - val_loss: -1.7991 - val_acc: 0.9666 - val_binary_accuracy: 0.9666 - val_mean_squared_error: 149.9281

Epoch 00011: val_loss improved from -1.70216 to -1.79910, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 12/150
10000/10000 [==============================] - 625s 62ms/step - loss: -1.9051 - acc: 0.8981 - binary_accuracy: 0.8981 - mean_squared_error: 461.7865 - val_loss: -1.7897 - val_acc: 0.9706 - val_binary_accuracy: 0.9706 - val_mean_squared_error: 149.9495

Epoch 00012: val_loss did not improve from -1.79910
Epoch 13/150
10000/10000 [==============================] - 688s 69ms/step - loss: -1.9252 - acc: 0.9199 - binary_accuracy: 0.9199 - mean_squared_error: 461.7419 - val_loss: -1.7048 - val_acc: 0.9387 - val_binary_accuracy: 0.9387 - val_mean_squared_error: 149.9669

Epoch 00013: val_loss did not improve from -1.79910
Epoch 14/150
10000/10000 [==============================] - 629s 63ms/step - loss: -1.9287 - acc: 0.9222 - binary_accuracy: 0.9222 - mean_squared_error: 461.7270 - val_loss: -1.8019 - val_acc: 0.9612 - val_binary_accuracy: 0.9612 - val_mean_squared_error: 149.9236

Epoch 00014: val_loss improved from -1.79910 to -1.80188, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 15/150
10000/10000 [==============================] - 637s 64ms/step - loss: -1.9302 - acc: 0.9231 - binary_accuracy: 0.9231 - mean_squared_error: 461.7265 - val_loss: -1.0855 - val_acc: 0.9479 - val_binary_accuracy: 0.9479 - val_mean_squared_error: 150.2156

Epoch 00015: val_loss did not improve from -1.80188
Epoch 16/150
10000/10000 [==============================] - 643s 64ms/step - loss: -1.9330 - acc: 0.9242 - binary_accuracy: 0.9242 - mean_squared_error: 461.7174 - val_loss: -1.8391 - val_acc: 0.9711 - val_binary_accuracy: 0.9711 - val_mean_squared_error: 149.9141

Epoch 00016: val_loss improved from -1.80188 to -1.83913, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 17/150
10000/10000 [==============================] - 701s 70ms/step - loss: -1.9344 - acc: 0.9259 - binary_accuracy: 0.9259 - mean_squared_error: 461.7126 - val_loss: -1.7672 - val_acc: 0.9518 - val_binary_accuracy: 0.9518 - val_mean_squared_error: 149.9227

Epoch 00017: val_loss did not improve from -1.83913
Epoch 18/150
10000/10000 [==============================] - 656s 66ms/step - loss: -1.9426 - acc: 0.9336 - binary_accuracy: 0.9336 - mean_squared_error: 461.6934 - val_loss: -1.7809 - val_acc: 0.9577 - val_binary_accuracy: 0.9577 - val_mean_squared_error: 149.9196

Epoch 00018: val_loss did not improve from -1.83913
Epoch 19/150
10000/10000 [==============================] - 673s 67ms/step - loss: -1.9342 - acc: 0.9281 - binary_accuracy: 0.9281 - mean_squared_error: 461.7254 - val_loss: -1.5791 - val_acc: 0.9088 - val_binary_accuracy: 0.9088 - val_mean_squared_error: 149.9623

Epoch 00019: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513.

Epoch 00019: val_loss did not improve from -1.83913
Epoch 20/150
10000/10000 [==============================] - 654s 65ms/step - loss: -1.9476 - acc: 0.9380 - binary_accuracy: 0.9380 - mean_squared_error: 461.6834 - val_loss: -1.8482 - val_acc: 0.9685 - val_binary_accuracy: 0.9685 - val_mean_squared_error: 149.9080

Epoch 00020: val_loss improved from -1.83913 to -1.84817, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 21/150
10000/10000 [==============================] - 723s 72ms/step - loss: -1.9519 - acc: 0.9411 - binary_accuracy: 0.9411 - mean_squared_error: 461.6738 - val_loss: -1.8625 - val_acc: 0.9720 - val_binary_accuracy: 0.9720 - val_mean_squared_error: 149.9051

Epoch 00021: val_loss improved from -1.84817 to -1.86250, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 22/150
10000/10000 [==============================] - 716s 72ms/step - loss: -1.9527 - acc: 0.9426 - binary_accuracy: 0.9426 - mean_squared_error: 461.6746 - val_loss: -1.8648 - val_acc: 0.9729 - val_binary_accuracy: 0.9729 - val_mean_squared_error: 149.9040

Epoch 00022: val_loss improved from -1.86250 to -1.86481, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 23/150
10000/10000 [==============================] - 647s 65ms/step - loss: -1.9547 - acc: 0.9440 - binary_accuracy: 0.9440 - mean_squared_error: 461.6670 - val_loss: -1.8691 - val_acc: 0.9748 - val_binary_accuracy: 0.9748 - val_mean_squared_error: 149.9050

Epoch 00023: val_loss improved from -1.86481 to -1.86910, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 24/150
10000/10000 [==============================] - 661s 66ms/step - loss: -1.9561 - acc: 0.9450 - binary_accuracy: 0.9450 - mean_squared_error: 461.6660 - val_loss: -1.8682 - val_acc: 0.9739 - val_binary_accuracy: 0.9739 - val_mean_squared_error: 149.9026

Epoch 00024: val_loss did not improve from -1.86910
Epoch 25/150
10000/10000 [==============================] - 698s 70ms/step - loss: -1.9556 - acc: 0.9452 - binary_accuracy: 0.9452 - mean_squared_error: 461.6644 - val_loss: -1.8753 - val_acc: 0.9752 - val_binary_accuracy: 0.9752 - val_mean_squared_error: 149.9019

Epoch 00025: val_loss improved from -1.86910 to -1.87531, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 26/150
10000/10000 [==============================] - 675s 68ms/step - loss: -1.9564 - acc: 0.9460 - binary_accuracy: 0.9460 - mean_squared_error: 461.6652 - val_loss: -1.8663 - val_acc: 0.9732 - val_binary_accuracy: 0.9732 - val_mean_squared_error: 149.9022

Epoch 00026: val_loss did not improve from -1.87531
Epoch 27/150
10000/10000 [==============================] - 717s 72ms/step - loss: -1.9577 - acc: 0.9470 - binary_accuracy: 0.9470 - mean_squared_error: 461.6624 - val_loss: -1.8696 - val_acc: 0.9734 - val_binary_accuracy: 0.9734 - val_mean_squared_error: 149.9017

Epoch 00027: val_loss did not improve from -1.87531
Epoch 28/150
10000/10000 [==============================] - 679s 68ms/step - loss: -1.9571 - acc: 0.9467 - binary_accuracy: 0.9467 - mean_squared_error: 461.6633 - val_loss: -1.8791 - val_acc: 0.9761 - val_binary_accuracy: 0.9761 - val_mean_squared_error: 149.9008

Epoch 00028: val_loss improved from -1.87531 to -1.87909, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 29/150
10000/10000 [==============================] - 706s 71ms/step - loss: -1.9589 - acc: 0.9478 - binary_accuracy: 0.9478 - mean_squared_error: 461.6595 - val_loss: -1.8777 - val_acc: 0.9754 - val_binary_accuracy: 0.9754 - val_mean_squared_error: 149.9012

Epoch 00029: val_loss did not improve from -1.87909
Epoch 30/150
10000/10000 [==============================] - 697s 70ms/step - loss: -1.9593 - acc: 0.9481 - binary_accuracy: 0.9481 - mean_squared_error: 461.6581 - val_loss: -1.8814 - val_acc: 0.9765 - val_binary_accuracy: 0.9765 - val_mean_squared_error: 149.8998

Epoch 00030: val_loss improved from -1.87909 to -1.88137, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 31/150
10000/10000 [==============================] - 709s 71ms/step - loss: -1.9599 - acc: 0.9489 - binary_accuracy: 0.9489 - mean_squared_error: 461.6579 - val_loss: -1.8841 - val_acc: 0.9766 - val_binary_accuracy: 0.9766 - val_mean_squared_error: 149.8997

Epoch 00031: val_loss improved from -1.88137 to -1.88410, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 32/150
10000/10000 [==============================] - 719s 72ms/step - loss: -1.9596 - acc: 0.9488 - binary_accuracy: 0.9488 - mean_squared_error: 461.6594 - val_loss: -1.8848 - val_acc: 0.9762 - val_binary_accuracy: 0.9762 - val_mean_squared_error: 149.8996

Epoch 00032: val_loss improved from -1.88410 to -1.88482, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 33/150
10000/10000 [==============================] - 690s 69ms/step - loss: -1.9607 - acc: 0.9495 - binary_accuracy: 0.9495 - mean_squared_error: 461.6563 - val_loss: -1.8883 - val_acc: 0.9777 - val_binary_accuracy: 0.9777 - val_mean_squared_error: 149.8993

Epoch 00033: val_loss improved from -1.88482 to -1.88833, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 34/150
10000/10000 [==============================] - 704s 70ms/step - loss: -1.9606 - acc: 0.9496 - binary_accuracy: 0.9496 - mean_squared_error: 461.6559 - val_loss: -1.8883 - val_acc: 0.9775 - val_binary_accuracy: 0.9775 - val_mean_squared_error: 149.8987

Epoch 00034: val_loss did not improve from -1.88833
Epoch 35/150
10000/10000 [==============================] - 700s 70ms/step - loss: -1.9613 - acc: 0.9505 - binary_accuracy: 0.9505 - mean_squared_error: 461.6551 - val_loss: -1.8910 - val_acc: 0.9774 - val_binary_accuracy: 0.9774 - val_mean_squared_error: 149.8984

Epoch 00035: val_loss improved from -1.88833 to -1.89100, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 36/150
10000/10000 [==============================] - 710s 71ms/step - loss: -1.9615 - acc: 0.9505 - binary_accuracy: 0.9505 - mean_squared_error: 461.6543 - val_loss: -1.8883 - val_acc: 0.9767 - val_binary_accuracy: 0.9767 - val_mean_squared_error: 149.8984

Epoch 00036: val_loss did not improve from -1.89100
Epoch 37/150
10000/10000 [==============================] - 728s 73ms/step - loss: -1.9618 - acc: 0.9508 - binary_accuracy: 0.9508 - mean_squared_error: 461.6536 - val_loss: -1.8897 - val_acc: 0.9768 - val_binary_accuracy: 0.9768 - val_mean_squared_error: 149.8982

Epoch 00037: val_loss did not improve from -1.89100
Epoch 38/150
10000/10000 [==============================] - 719s 72ms/step - loss: -1.9624 - acc: 0.9512 - binary_accuracy: 0.9512 - mean_squared_error: 461.6536 - val_loss: -1.8947 - val_acc: 0.9781 - val_binary_accuracy: 0.9781 - val_mean_squared_error: 149.8975

Epoch 00038: val_loss improved from -1.89100 to -1.89471, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 39/150
10000/10000 [==============================] - 704s 70ms/step - loss: -1.9627 - acc: 0.9514 - binary_accuracy: 0.9514 - mean_squared_error: 461.6530 - val_loss: -1.8889 - val_acc: 0.9762 - val_binary_accuracy: 0.9762 - val_mean_squared_error: 149.8982

Epoch 00039: val_loss did not improve from -1.89471
Epoch 40/150
10000/10000 [==============================] - 703s 70ms/step - loss: -1.9631 - acc: 0.9518 - binary_accuracy: 0.9518 - mean_squared_error: 461.6519 - val_loss: -1.8918 - val_acc: 0.9772 - val_binary_accuracy: 0.9772 - val_mean_squared_error: 149.8973

Epoch 00040: val_loss did not improve from -1.89471
Epoch 41/150
10000/10000 [==============================] - 724s 72ms/step - loss: -1.9636 - acc: 0.9525 - binary_accuracy: 0.9525 - mean_squared_error: 461.6508 - val_loss: -1.9017 - val_acc: 0.9789 - val_binary_accuracy: 0.9789 - val_mean_squared_error: 149.8964

Epoch 00041: val_loss improved from -1.89471 to -1.90166, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 42/150
10000/10000 [==============================] - 712s 71ms/step - loss: -1.9632 - acc: 0.9521 - binary_accuracy: 0.9521 - mean_squared_error: 461.6525 - val_loss: -1.8982 - val_acc: 0.9777 - val_binary_accuracy: 0.9777 - val_mean_squared_error: 149.8967

Epoch 00042: val_loss did not improve from -1.90166
Epoch 43/150
10000/10000 [==============================] - 738s 74ms/step - loss: -1.9639 - acc: 0.9526 - binary_accuracy: 0.9526 - mean_squared_error: 461.6507 - val_loss: -1.9043 - val_acc: 0.9794 - val_binary_accuracy: 0.9794 - val_mean_squared_error: 149.8960

Epoch 00043: val_loss improved from -1.90166 to -1.90425, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 44/150
10000/10000 [==============================] - 705s 70ms/step - loss: -1.9640 - acc: 0.9531 - binary_accuracy: 0.9531 - mean_squared_error: 461.6502 - val_loss: -1.9049 - val_acc: 0.9800 - val_binary_accuracy: 0.9800 - val_mean_squared_error: 149.8964

Epoch 00044: val_loss improved from -1.90425 to -1.90490, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 45/150
10000/10000 [==============================] - 733s 73ms/step - loss: -1.9639 - acc: 0.9530 - binary_accuracy: 0.9530 - mean_squared_error: 461.6512 - val_loss: -1.9058 - val_acc: 0.9805 - val_binary_accuracy: 0.9805 - val_mean_squared_error: 149.8968

Epoch 00045: val_loss improved from -1.90490 to -1.90582, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 46/150
10000/10000 [==============================] - 713s 71ms/step - loss: -1.9645 - acc: 0.9535 - binary_accuracy: 0.9535 - mean_squared_error: 461.6494 - val_loss: -1.9009 - val_acc: 0.9784 - val_binary_accuracy: 0.9784 - val_mean_squared_error: 149.8958

Epoch 00046: val_loss did not improve from -1.90582
Epoch 47/150
10000/10000 [==============================] - 747s 75ms/step - loss: -1.9645 - acc: 0.9538 - binary_accuracy: 0.9538 - mean_squared_error: 461.6489 - val_loss: -1.9080 - val_acc: 0.9798 - val_binary_accuracy: 0.9798 - val_mean_squared_error: 149.8955

Epoch 00047: val_loss improved from -1.90582 to -1.90801, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 48/150
10000/10000 [==============================] - 730s 73ms/step - loss: -1.9648 - acc: 0.9540 - binary_accuracy: 0.9540 - mean_squared_error: 461.6483 - val_loss: -1.9107 - val_acc: 0.9810 - val_binary_accuracy: 0.9810 - val_mean_squared_error: 149.8956

Epoch 00048: val_loss improved from -1.90801 to -1.91072, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 49/150
10000/10000 [==============================] - 730s 73ms/step - loss: -1.9650 - acc: 0.9543 - binary_accuracy: 0.9543 - mean_squared_error: 461.6484 - val_loss: -1.9025 - val_acc: 0.9786 - val_binary_accuracy: 0.9786 - val_mean_squared_error: 149.8967

Epoch 00049: val_loss did not improve from -1.91072
Epoch 50/150
10000/10000 [==============================] - 718s 72ms/step - loss: -1.9645 - acc: 0.9537 - binary_accuracy: 0.9537 - mean_squared_error: 461.6495 - val_loss: -1.9073 - val_acc: 0.9799 - val_binary_accuracy: 0.9799 - val_mean_squared_error: 149.8948

Epoch 00050: val_loss did not improve from -1.91072
Epoch 51/150
10000/10000 [==============================] - 734s 73ms/step - loss: -1.9656 - acc: 0.9545 - binary_accuracy: 0.9545 - mean_squared_error: 461.6479 - val_loss: -1.9145 - val_acc: 0.9815 - val_binary_accuracy: 0.9815 - val_mean_squared_error: 149.8948

Epoch 00051: val_loss improved from -1.91072 to -1.91452, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 52/150
10000/10000 [==============================] - 732s 73ms/step - loss: -1.9657 - acc: 0.9547 - binary_accuracy: 0.9547 - mean_squared_error: 461.6475 - val_loss: -1.9094 - val_acc: 0.9797 - val_binary_accuracy: 0.9797 - val_mean_squared_error: 149.8945

Epoch 00052: val_loss did not improve from -1.91452
Epoch 53/150
10000/10000 [==============================] - 745s 75ms/step - loss: -1.9658 - acc: 0.9549 - binary_accuracy: 0.9549 - mean_squared_error: 461.6472 - val_loss: -1.9118 - val_acc: 0.9798 - val_binary_accuracy: 0.9798 - val_mean_squared_error: 149.8942

Epoch 00053: val_loss did not improve from -1.91452
Epoch 54/150
10000/10000 [==============================] - 724s 72ms/step - loss: -1.9663 - acc: 0.9552 - binary_accuracy: 0.9552 - mean_squared_error: 461.6463 - val_loss: -1.9087 - val_acc: 0.9793 - val_binary_accuracy: 0.9793 - val_mean_squared_error: 149.8950

Epoch 00054: ReduceLROnPlateau reducing learning rate to 1.0000000474974514e-05.

Epoch 00054: val_loss did not improve from -1.91452
Epoch 55/150
10000/10000 [==============================] - 738s 74ms/step - loss: -1.9665 - acc: 0.9557 - binary_accuracy: 0.9557 - mean_squared_error: 461.6460 - val_loss: -1.9144 - val_acc: 0.9806 - val_binary_accuracy: 0.9806 - val_mean_squared_error: 149.8936

Epoch 00055: val_loss did not improve from -1.91452
Epoch 56/150
10000/10000 [==============================] - 716s 72ms/step - loss: -1.9671 - acc: 0.9559 - binary_accuracy: 0.9559 - mean_squared_error: 461.6451 - val_loss: -1.9166 - val_acc: 0.9811 - val_binary_accuracy: 0.9811 - val_mean_squared_error: 149.8934

Epoch 00056: val_loss improved from -1.91452 to -1.91661, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 57/150
10000/10000 [==============================] - 746s 75ms/step - loss: -1.9668 - acc: 0.9560 - binary_accuracy: 0.9560 - mean_squared_error: 461.6454 - val_loss: -1.9163 - val_acc: 0.9810 - val_binary_accuracy: 0.9810 - val_mean_squared_error: 149.8934

Epoch 00057: val_loss did not improve from -1.91661
Epoch 58/150
10000/10000 [==============================] - 726s 73ms/step - loss: -1.9671 - acc: 0.9561 - binary_accuracy: 0.9561 - mean_squared_error: 461.6451 - val_loss: -1.9169 - val_acc: 0.9809 - val_binary_accuracy: 0.9809 - val_mean_squared_error: 149.8934

Epoch 00058: val_loss improved from -1.91661 to -1.91692, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 59/150
10000/10000 [==============================] - 739s 74ms/step - loss: -1.9668 - acc: 0.9561 - binary_accuracy: 0.9561 - mean_squared_error: 461.6449 - val_loss: -1.9171 - val_acc: 0.9811 - val_binary_accuracy: 0.9811 - val_mean_squared_error: 149.8933

Epoch 00059: val_loss improved from -1.91692 to -1.91710, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 60/150
10000/10000 [==============================] - 717s 72ms/step - loss: -1.9671 - acc: 0.9562 - binary_accuracy: 0.9562 - mean_squared_error: 461.6449 - val_loss: -1.9177 - val_acc: 0.9812 - val_binary_accuracy: 0.9812 - val_mean_squared_error: 149.8932

Epoch 00060: val_loss improved from -1.91710 to -1.91768, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 61/150
10000/10000 [==============================] - 735s 73ms/step - loss: -1.9674 - acc: 0.9563 - binary_accuracy: 0.9563 - mean_squared_error: 461.6447 - val_loss: -1.9170 - val_acc: 0.9810 - val_binary_accuracy: 0.9810 - val_mean_squared_error: 149.8933

Epoch 00061: val_loss did not improve from -1.91768
Epoch 62/150
10000/10000 [==============================] - 734s 73ms/step - loss: -1.9672 - acc: 0.9562 - binary_accuracy: 0.9562 - mean_squared_error: 461.6447 - val_loss: -1.9183 - val_acc: 0.9814 - val_binary_accuracy: 0.9814 - val_mean_squared_error: 149.8932

Epoch 00062: val_loss improved from -1.91768 to -1.91826, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 63/150
10000/10000 [==============================] - 735s 73ms/step - loss: -1.9669 - acc: 0.9564 - binary_accuracy: 0.9564 - mean_squared_error: 461.6446 - val_loss: -1.9174 - val_acc: 0.9812 - val_binary_accuracy: 0.9812 - val_mean_squared_error: 149.8932

Epoch 00063: val_loss did not improve from -1.91826
Epoch 64/150
10000/10000 [==============================] - 715s 72ms/step - loss: -1.9673 - acc: 0.9563 - binary_accuracy: 0.9563 - mean_squared_error: 461.6442 - val_loss: -1.9176 - val_acc: 0.9812 - val_binary_accuracy: 0.9812 - val_mean_squared_error: 149.8932

Epoch 00064: val_loss did not improve from -1.91826
Epoch 65/150
10000/10000 [==============================] - 737s 74ms/step - loss: -1.9675 - acc: 0.9564 - binary_accuracy: 0.9564 - mean_squared_error: 461.6447 - val_loss: -1.9178 - val_acc: 0.9811 - val_binary_accuracy: 0.9811 - val_mean_squared_error: 149.8931

Epoch 00065: ReduceLROnPlateau reducing learning rate to 1e-05.

Epoch 00065: val_loss did not improve from -1.91826
Epoch 66/150
10000/10000 [==============================] - 726s 73ms/step - loss: -1.9671 - acc: 0.9565 - binary_accuracy: 0.9565 - mean_squared_error: 461.6444 - val_loss: -1.9183 - val_acc: 0.9813 - val_binary_accuracy: 0.9813 - val_mean_squared_error: 149.8930

Epoch 00066: val_loss improved from -1.91826 to -1.91832, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 67/150
10000/10000 [==============================] - 750s 75ms/step - loss: -1.9671 - acc: 0.9565 - binary_accuracy: 0.9565 - mean_squared_error: 461.6446 - val_loss: -1.9179 - val_acc: 0.9812 - val_binary_accuracy: 0.9812 - val_mean_squared_error: 149.8932

Epoch 00067: val_loss did not improve from -1.91832
Epoch 68/150
10000/10000 [==============================] - 732s 73ms/step - loss: -1.9672 - acc: 0.9564 - binary_accuracy: 0.9564 - mean_squared_error: 461.6445 - val_loss: -1.9178 - val_acc: 0.9811 - val_binary_accuracy: 0.9811 - val_mean_squared_error: 149.8931

Epoch 00068: val_loss did not improve from -1.91832
Epoch 69/150
10000/10000 [==============================] - 741s 74ms/step - loss: -1.9673 - acc: 0.9566 - binary_accuracy: 0.9566 - mean_squared_error: 461.6446 - val_loss: -1.9184 - val_acc: 0.9813 - val_binary_accuracy: 0.9813 - val_mean_squared_error: 149.8930

Epoch 00069: val_loss improved from -1.91832 to -1.91837, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 70/150
10000/10000 [==============================] - 715s 72ms/step - loss: -1.9675 - acc: 0.9565 - binary_accuracy: 0.9565 - mean_squared_error: 461.6443 - val_loss: -1.9184 - val_acc: 0.9813 - val_binary_accuracy: 0.9813 - val_mean_squared_error: 149.8931

Epoch 00070: val_loss improved from -1.91837 to -1.91838, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 71/150
10000/10000 [==============================] - 750s 75ms/step - loss: -1.9675 - acc: 0.9567 - binary_accuracy: 0.9567 - mean_squared_error: 461.6443 - val_loss: -1.9185 - val_acc: 0.9813 - val_binary_accuracy: 0.9813 - val_mean_squared_error: 149.8930

Epoch 00071: val_loss improved from -1.91838 to -1.91852, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 72/150
10000/10000 [==============================] - 730s 73ms/step - loss: -1.9675 - acc: 0.9566 - binary_accuracy: 0.9566 - mean_squared_error: 461.6438 - val_loss: -1.9183 - val_acc: 0.9813 - val_binary_accuracy: 0.9813 - val_mean_squared_error: 149.8929

Epoch 00072: val_loss did not improve from -1.91852
Epoch 73/150
10000/10000 [==============================] - 746s 75ms/step - loss: -1.9675 - acc: 0.9568 - binary_accuracy: 0.9568 - mean_squared_error: 461.6441 - val_loss: -1.9183 - val_acc: 0.9813 - val_binary_accuracy: 0.9813 - val_mean_squared_error: 149.8930

Epoch 00073: val_loss did not improve from -1.91852
Epoch 74/150
10000/10000 [==============================] - 709s 71ms/step - loss: -1.9673 - acc: 0.9567 - binary_accuracy: 0.9567 - mean_squared_error: 461.6441 - val_loss: -1.9183 - val_acc: 0.9811 - val_binary_accuracy: 0.9811 - val_mean_squared_error: 149.8929

Epoch 00074: val_loss did not improve from -1.91852
Epoch 75/150
10000/10000 [==============================] - 753s 75ms/step - loss: -1.9678 - acc: 0.9567 - binary_accuracy: 0.9567 - mean_squared_error: 461.6438 - val_loss: -1.9182 - val_acc: 0.9812 - val_binary_accuracy: 0.9812 - val_mean_squared_error: 149.8928

Epoch 00075: val_loss did not improve from -1.91852
Epoch 76/150
10000/10000 [==============================] - 718s 72ms/step - loss: -1.9676 - acc: 0.9567 - binary_accuracy: 0.9567 - mean_squared_error: 461.6440 - val_loss: -1.9198 - val_acc: 0.9815 - val_binary_accuracy: 0.9815 - val_mean_squared_error: 149.8929

Epoch 00076: val_loss improved from -1.91852 to -1.91979, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 77/150
10000/10000 [==============================] - 759s 76ms/step - loss: -1.9677 - acc: 0.9567 - binary_accuracy: 0.9567 - mean_squared_error: 461.6439 - val_loss: -1.9186 - val_acc: 0.9814 - val_binary_accuracy: 0.9814 - val_mean_squared_error: 149.8929

Epoch 00077: val_loss did not improve from -1.91979
Epoch 78/150
10000/10000 [==============================] - 736s 74ms/step - loss: -1.9673 - acc: 0.9568 - binary_accuracy: 0.9568 - mean_squared_error: 461.6441 - val_loss: -1.9196 - val_acc: 0.9814 - val_binary_accuracy: 0.9814 - val_mean_squared_error: 149.8928

Epoch 00078: val_loss did not improve from -1.91979
Epoch 79/150
10000/10000 [==============================] - 740s 74ms/step - loss: -1.9675 - acc: 0.9567 - binary_accuracy: 0.9567 - mean_squared_error: 461.6439 - val_loss: -1.9189 - val_acc: 0.9812 - val_binary_accuracy: 0.9812 - val_mean_squared_error: 149.8928

Epoch 00079: val_loss did not improve from -1.91979
Epoch 80/150
10000/10000 [==============================] - 730s 73ms/step - loss: -1.9679 - acc: 0.9569 - binary_accuracy: 0.9569 - mean_squared_error: 461.6438 - val_loss: -1.9194 - val_acc: 0.9814 - val_binary_accuracy: 0.9814 - val_mean_squared_error: 149.8927

Epoch 00080: val_loss did not improve from -1.91979
Epoch 81/150
10000/10000 [==============================] - 749s 75ms/step - loss: -1.9676 - acc: 0.9568 - binary_accuracy: 0.9568 - mean_squared_error: 461.6439 - val_loss: -1.9197 - val_acc: 0.9816 - val_binary_accuracy: 0.9816 - val_mean_squared_error: 149.8928

Epoch 00081: val_loss did not improve from -1.91979
Epoch 82/150
10000/10000 [==============================] - 739s 74ms/step - loss: -1.9677 - acc: 0.9568 - binary_accuracy: 0.9568 - mean_squared_error: 461.6437 - val_loss: -1.9199 - val_acc: 0.9816 - val_binary_accuracy: 0.9816 - val_mean_squared_error: 149.8927

Epoch 00082: val_loss improved from -1.91979 to -1.91994, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 83/150
10000/10000 [==============================] - 738s 74ms/step - loss: -1.9677 - acc: 0.9570 - binary_accuracy: 0.9570 - mean_squared_error: 461.6443 - val_loss: -1.9195 - val_acc: 0.9814 - val_binary_accuracy: 0.9814 - val_mean_squared_error: 149.8927

Epoch 00083: val_loss did not improve from -1.91994
Epoch 84/150
10000/10000 [==============================] - 731s 73ms/step - loss: -1.9678 - acc: 0.9569 - binary_accuracy: 0.9569 - mean_squared_error: 461.6436 - val_loss: -1.9201 - val_acc: 0.9815 - val_binary_accuracy: 0.9815 - val_mean_squared_error: 149.8926

Epoch 00084: val_loss improved from -1.91994 to -1.92014, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 85/150
10000/10000 [==============================] - 742s 74ms/step - loss: -1.9678 - acc: 0.9569 - binary_accuracy: 0.9569 - mean_squared_error: 461.6441 - val_loss: -1.9193 - val_acc: 0.9814 - val_binary_accuracy: 0.9814 - val_mean_squared_error: 149.8927

Epoch 00085: val_loss did not improve from -1.92014
Epoch 86/150
10000/10000 [==============================] - 725s 72ms/step - loss: -1.9680 - acc: 0.9569 - binary_accuracy: 0.9569 - mean_squared_error: 461.6435 - val_loss: -1.9195 - val_acc: 0.9813 - val_binary_accuracy: 0.9813 - val_mean_squared_error: 149.8926

Epoch 00086: val_loss did not improve from -1.92014
Epoch 87/150
10000/10000 [==============================] - 755s 75ms/step - loss: -1.9677 - acc: 0.9570 - binary_accuracy: 0.9570 - mean_squared_error: 461.6435 - val_loss: -1.9201 - val_acc: 0.9815 - val_binary_accuracy: 0.9815 - val_mean_squared_error: 149.8926

Epoch 00087: val_loss did not improve from -1.92014
Epoch 88/150
10000/10000 [==============================] - 724s 72ms/step - loss: -1.9679 - acc: 0.9570 - binary_accuracy: 0.9570 - mean_squared_error: 461.6434 - val_loss: -1.9198 - val_acc: 0.9815 - val_binary_accuracy: 0.9815 - val_mean_squared_error: 149.8926

Epoch 00088: val_loss did not improve from -1.92014
Epoch 89/150
10000/10000 [==============================] - 744s 74ms/step - loss: -1.9679 - acc: 0.9570 - binary_accuracy: 0.9570 - mean_squared_error: 461.6437 - val_loss: -1.9196 - val_acc: 0.9814 - val_binary_accuracy: 0.9814 - val_mean_squared_error: 149.8926

Epoch 00089: val_loss did not improve from -1.92014
Epoch 90/150
10000/10000 [==============================] - 733s 73ms/step - loss: -1.9682 - acc: 0.9571 - binary_accuracy: 0.9571 - mean_squared_error: 461.6434 - val_loss: -1.9200 - val_acc: 0.9815 - val_binary_accuracy: 0.9815 - val_mean_squared_error: 149.8926

Epoch 00090: val_loss did not improve from -1.92014
Epoch 91/150
10000/10000 [==============================] - 751s 75ms/step - loss: -1.9677 - acc: 0.9571 - binary_accuracy: 0.9571 - mean_squared_error: 461.6436 - val_loss: -1.9205 - val_acc: 0.9815 - val_binary_accuracy: 0.9815 - val_mean_squared_error: 149.8925

Epoch 00091: val_loss improved from -1.92014 to -1.92045, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 92/150
10000/10000 [==============================] - 740s 74ms/step - loss: -1.9679 - acc: 0.9570 - binary_accuracy: 0.9570 - mean_squared_error: 461.6430 - val_loss: -1.9203 - val_acc: 0.9814 - val_binary_accuracy: 0.9814 - val_mean_squared_error: 149.8925

Epoch 00092: val_loss did not improve from -1.92045
Epoch 93/150
10000/10000 [==============================] - 746s 75ms/step - loss: -1.9678 - acc: 0.9571 - binary_accuracy: 0.9571 - mean_squared_error: 461.6435 - val_loss: -1.9199 - val_acc: 0.9814 - val_binary_accuracy: 0.9814 - val_mean_squared_error: 149.8924

Epoch 00093: val_loss did not improve from -1.92045
Epoch 94/150
10000/10000 [==============================] - 741s 74ms/step - loss: -1.9679 - acc: 0.9571 - binary_accuracy: 0.9571 - mean_squared_error: 461.6432 - val_loss: -1.9218 - val_acc: 0.9820 - val_binary_accuracy: 0.9820 - val_mean_squared_error: 149.8925

Epoch 00094: val_loss improved from -1.92045 to -1.92179, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 95/150
10000/10000 [==============================] - 737s 74ms/step - loss: -1.9678 - acc: 0.9572 - binary_accuracy: 0.9572 - mean_squared_error: 461.6432 - val_loss: -1.9214 - val_acc: 0.9817 - val_binary_accuracy: 0.9817 - val_mean_squared_error: 149.8924

Epoch 00095: val_loss did not improve from -1.92179
Epoch 96/150
10000/10000 [==============================] - 736s 74ms/step - loss: -1.9682 - acc: 0.9572 - binary_accuracy: 0.9572 - mean_squared_error: 461.6430 - val_loss: -1.9216 - val_acc: 0.9817 - val_binary_accuracy: 0.9817 - val_mean_squared_error: 149.8924

Epoch 00096: val_loss did not improve from -1.92179
Epoch 97/150
10000/10000 [==============================] - 762s 76ms/step - loss: -1.9681 - acc: 0.9572 - binary_accuracy: 0.9572 - mean_squared_error: 461.6433 - val_loss: -1.9215 - val_acc: 0.9818 - val_binary_accuracy: 0.9818 - val_mean_squared_error: 149.8924

Epoch 00097: val_loss did not improve from -1.92179
Epoch 98/150
10000/10000 [==============================] - 739s 74ms/step - loss: -1.9680 - acc: 0.9570 - binary_accuracy: 0.9570 - mean_squared_error: 461.6432 - val_loss: -1.9211 - val_acc: 0.9816 - val_binary_accuracy: 0.9816 - val_mean_squared_error: 149.8924

Epoch 00098: val_loss did not improve from -1.92179
Epoch 99/150
10000/10000 [==============================] - 740s 74ms/step - loss: -1.9683 - acc: 0.9572 - binary_accuracy: 0.9572 - mean_squared_error: 461.6427 - val_loss: -1.9203 - val_acc: 0.9815 - val_binary_accuracy: 0.9815 - val_mean_squared_error: 149.8923

Epoch 00099: val_loss did not improve from -1.92179
Epoch 100/150
10000/10000 [==============================] - 730s 73ms/step - loss: -1.9682 - acc: 0.9572 - binary_accuracy: 0.9572 - mean_squared_error: 461.6428 - val_loss: -1.9211 - val_acc: 0.9818 - val_binary_accuracy: 0.9818 - val_mean_squared_error: 149.8924

Epoch 00100: val_loss did not improve from -1.92179
Epoch 101/150
10000/10000 [==============================] - 752s 75ms/step - loss: -1.9680 - acc: 0.9573 - binary_accuracy: 0.9573 - mean_squared_error: 461.6431 - val_loss: -1.9209 - val_acc: 0.9816 - val_binary_accuracy: 0.9816 - val_mean_squared_error: 149.8923

Epoch 00101: val_loss did not improve from -1.92179
Epoch 102/150
10000/10000 [==============================] - 727s 73ms/step - loss: -1.9681 - acc: 0.9572 - binary_accuracy: 0.9572 - mean_squared_error: 461.6429 - val_loss: -1.9218 - val_acc: 0.9818 - val_binary_accuracy: 0.9818 - val_mean_squared_error: 149.8922

Epoch 00102: val_loss improved from -1.92179 to -1.92181, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 103/150
10000/10000 [==============================] - 743s 74ms/step - loss: -1.9681 - acc: 0.9572 - binary_accuracy: 0.9572 - mean_squared_error: 461.6428 - val_loss: -1.9213 - val_acc: 0.9816 - val_binary_accuracy: 0.9816 - val_mean_squared_error: 149.8922

Epoch 00103: val_loss did not improve from -1.92181
Epoch 104/150
10000/10000 [==============================] - 727s 73ms/step - loss: -1.9680 - acc: 0.9572 - binary_accuracy: 0.9572 - mean_squared_error: 461.6429 - val_loss: -1.9217 - val_acc: 0.9817 - val_binary_accuracy: 0.9817 - val_mean_squared_error: 149.8923

Epoch 00104: val_loss did not improve from -1.92181
Epoch 105/150
10000/10000 [==============================] - 745s 75ms/step - loss: -1.9683 - acc: 0.9574 - binary_accuracy: 0.9574 - mean_squared_error: 461.6428 - val_loss: -1.9223 - val_acc: 0.9817 - val_binary_accuracy: 0.9817 - val_mean_squared_error: 149.8921

Epoch 00105: val_loss improved from -1.92181 to -1.92227, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 106/150
10000/10000 [==============================] - 742s 74ms/step - loss: -1.9683 - acc: 0.9574 - binary_accuracy: 0.9574 - mean_squared_error: 461.6428 - val_loss: -1.9209 - val_acc: 0.9814 - val_binary_accuracy: 0.9814 - val_mean_squared_error: 149.8921

Epoch 00106: val_loss did not improve from -1.92227
Epoch 107/150
10000/10000 [==============================] - 748s 75ms/step - loss: -1.9682 - acc: 0.9574 - binary_accuracy: 0.9574 - mean_squared_error: 461.6429 - val_loss: -1.9215 - val_acc: 0.9816 - val_binary_accuracy: 0.9816 - val_mean_squared_error: 149.8921

Epoch 00107: val_loss did not improve from -1.92227
Epoch 108/150
10000/10000 [==============================] - 732s 73ms/step - loss: -1.9683 - acc: 0.9573 - binary_accuracy: 0.9573 - mean_squared_error: 461.6427 - val_loss: -1.9220 - val_acc: 0.9816 - val_binary_accuracy: 0.9816 - val_mean_squared_error: 149.8921

Epoch 00108: val_loss did not improve from -1.92227
Epoch 109/150
10000/10000 [==============================] - 740s 74ms/step - loss: -1.9681 - acc: 0.9575 - binary_accuracy: 0.9575 - mean_squared_error: 461.6430 - val_loss: -1.9221 - val_acc: 0.9817 - val_binary_accuracy: 0.9817 - val_mean_squared_error: 149.8921

Epoch 00109: val_loss did not improve from -1.92227
Epoch 110/150
10000/10000 [==============================] - 724s 72ms/step - loss: -1.9682 - acc: 0.9575 - binary_accuracy: 0.9575 - mean_squared_error: 461.6424 - val_loss: -1.9217 - val_acc: 0.9816 - val_binary_accuracy: 0.9816 - val_mean_squared_error: 149.8920

Epoch 00110: val_loss did not improve from -1.92227
Epoch 111/150
10000/10000 [==============================] - 759s 76ms/step - loss: -1.9681 - acc: 0.9575 - binary_accuracy: 0.9575 - mean_squared_error: 461.6425 - val_loss: -1.9232 - val_acc: 0.9820 - val_binary_accuracy: 0.9820 - val_mean_squared_error: 149.8920

Epoch 00111: val_loss improved from -1.92227 to -1.92322, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 112/150
10000/10000 [==============================] - 738s 74ms/step - loss: -1.9685 - acc: 0.9575 - binary_accuracy: 0.9575 - mean_squared_error: 461.6426 - val_loss: -1.9224 - val_acc: 0.9817 - val_binary_accuracy: 0.9817 - val_mean_squared_error: 149.8921

Epoch 00112: val_loss did not improve from -1.92322
Epoch 113/150
10000/10000 [==============================] - 742s 74ms/step - loss: -1.9685 - acc: 0.9576 - binary_accuracy: 0.9576 - mean_squared_error: 461.6424 - val_loss: -1.9220 - val_acc: 0.9816 - val_binary_accuracy: 0.9816 - val_mean_squared_error: 149.8920

Epoch 00113: val_loss did not improve from -1.92322
Epoch 114/150
10000/10000 [==============================] - 749s 75ms/step - loss: -1.9683 - acc: 0.9576 - binary_accuracy: 0.9576 - mean_squared_error: 461.6426 - val_loss: -1.9228 - val_acc: 0.9819 - val_binary_accuracy: 0.9819 - val_mean_squared_error: 149.8920

Epoch 00114: val_loss did not improve from -1.92322
Epoch 115/150
10000/10000 [==============================] - 740s 74ms/step - loss: -1.9680 - acc: 0.9575 - binary_accuracy: 0.9575 - mean_squared_error: 461.6428 - val_loss: -1.9216 - val_acc: 0.9814 - val_binary_accuracy: 0.9814 - val_mean_squared_error: 149.8920

Epoch 00115: val_loss did not improve from -1.92322
Epoch 116/150
10000/10000 [==============================] - 739s 74ms/step - loss: -1.9685 - acc: 0.9576 - binary_accuracy: 0.9576 - mean_squared_error: 461.6423 - val_loss: -1.9235 - val_acc: 0.9820 - val_binary_accuracy: 0.9820 - val_mean_squared_error: 149.8920

Epoch 00116: val_loss improved from -1.92322 to -1.92345, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 117/150
10000/10000 [==============================] - 719s 72ms/step - loss: -1.9686 - acc: 0.9577 - binary_accuracy: 0.9577 - mean_squared_error: 461.6425 - val_loss: -1.9238 - val_acc: 0.9821 - val_binary_accuracy: 0.9821 - val_mean_squared_error: 149.8919

Epoch 00117: val_loss improved from -1.92345 to -1.92382, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 118/150
10000/10000 [==============================] - 746s 75ms/step - loss: -1.9685 - acc: 0.9576 - binary_accuracy: 0.9576 - mean_squared_error: 461.6426 - val_loss: -1.9233 - val_acc: 0.9819 - val_binary_accuracy: 0.9819 - val_mean_squared_error: 149.8920

Epoch 00118: val_loss did not improve from -1.92382
Epoch 119/150
10000/10000 [==============================] - 744s 74ms/step - loss: -1.9684 - acc: 0.9576 - binary_accuracy: 0.9576 - mean_squared_error: 461.6425 - val_loss: -1.9225 - val_acc: 0.9817 - val_binary_accuracy: 0.9817 - val_mean_squared_error: 149.8919

Epoch 00119: val_loss did not improve from -1.92382
Epoch 120/150
10000/10000 [==============================] - 753s 75ms/step - loss: -1.9686 - acc: 0.9576 - binary_accuracy: 0.9576 - mean_squared_error: 461.6426 - val_loss: -1.9222 - val_acc: 0.9818 - val_binary_accuracy: 0.9818 - val_mean_squared_error: 149.8920

Epoch 00120: val_loss did not improve from -1.92382
Epoch 121/150
10000/10000 [==============================] - 746s 75ms/step - loss: -1.9685 - acc: 0.9576 - binary_accuracy: 0.9576 - mean_squared_error: 461.6422 - val_loss: -1.9236 - val_acc: 0.9819 - val_binary_accuracy: 0.9819 - val_mean_squared_error: 149.8919

Epoch 00121: val_loss did not improve from -1.92382
Epoch 122/150
10000/10000 [==============================] - 727s 73ms/step - loss: -1.9687 - acc: 0.9576 - binary_accuracy: 0.9576 - mean_squared_error: 461.6420 - val_loss: -1.9235 - val_acc: 0.9819 - val_binary_accuracy: 0.9819 - val_mean_squared_error: 149.8919

Epoch 00122: val_loss did not improve from -1.92382
Epoch 123/150
10000/10000 [==============================] - 750s 75ms/step - loss: -1.9687 - acc: 0.9577 - binary_accuracy: 0.9577 - mean_squared_error: 461.6421 - val_loss: -1.9234 - val_acc: 0.9818 - val_binary_accuracy: 0.9818 - val_mean_squared_error: 149.8918

Epoch 00123: val_loss did not improve from -1.92382
Epoch 124/150
10000/10000 [==============================] - 721s 72ms/step - loss: -1.9684 - acc: 0.9577 - binary_accuracy: 0.9577 - mean_squared_error: 461.6422 - val_loss: -1.9237 - val_acc: 0.9819 - val_binary_accuracy: 0.9819 - val_mean_squared_error: 149.8918

Epoch 00124: val_loss did not improve from -1.92382
Epoch 125/150
10000/10000 [==============================] - 778s 78ms/step - loss: -1.9683 - acc: 0.9578 - binary_accuracy: 0.9578 - mean_squared_error: 461.6425 - val_loss: -1.9239 - val_acc: 0.9820 - val_binary_accuracy: 0.9820 - val_mean_squared_error: 149.8918

Epoch 00125: val_loss improved from -1.92382 to -1.92390, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 126/150
10000/10000 [==============================] - 744s 74ms/step - loss: -1.9686 - acc: 0.9577 - binary_accuracy: 0.9577 - mean_squared_error: 461.6421 - val_loss: -1.9240 - val_acc: 0.9821 - val_binary_accuracy: 0.9821 - val_mean_squared_error: 149.8918

Epoch 00126: val_loss improved from -1.92390 to -1.92399, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 127/150
10000/10000 [==============================] - 743s 74ms/step - loss: -1.9688 - acc: 0.9578 - binary_accuracy: 0.9578 - mean_squared_error: 461.6420 - val_loss: -1.9240 - val_acc: 0.9820 - val_binary_accuracy: 0.9820 - val_mean_squared_error: 149.8917

Epoch 00127: val_loss improved from -1.92399 to -1.92403, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 128/150
10000/10000 [==============================] - 743s 74ms/step - loss: -1.9686 - acc: 0.9578 - binary_accuracy: 0.9578 - mean_squared_error: 461.6421 - val_loss: -1.9246 - val_acc: 0.9822 - val_binary_accuracy: 0.9822 - val_mean_squared_error: 149.8917

Epoch 00128: val_loss improved from -1.92403 to -1.92463, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 129/150
10000/10000 [==============================] - 728s 73ms/step - loss: -1.9686 - acc: 0.9578 - binary_accuracy: 0.9578 - mean_squared_error: 461.6420 - val_loss: -1.9236 - val_acc: 0.9818 - val_binary_accuracy: 0.9818 - val_mean_squared_error: 149.8917

Epoch 00129: val_loss did not improve from -1.92463
Epoch 130/150
10000/10000 [==============================] - 728s 73ms/step - loss: -1.9685 - acc: 0.9577 - binary_accuracy: 0.9577 - mean_squared_error: 461.6421 - val_loss: -1.9247 - val_acc: 0.9821 - val_binary_accuracy: 0.9821 - val_mean_squared_error: 149.8917

Epoch 00130: val_loss improved from -1.92463 to -1.92469, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 131/150
10000/10000 [==============================] - 756s 76ms/step - loss: -1.9687 - acc: 0.9578 - binary_accuracy: 0.9578 - mean_squared_error: 461.6420 - val_loss: -1.9239 - val_acc: 0.9819 - val_binary_accuracy: 0.9819 - val_mean_squared_error: 149.8917

Epoch 00131: val_loss did not improve from -1.92469
Epoch 132/150
10000/10000 [==============================] - 714s 71ms/step - loss: -1.9685 - acc: 0.9579 - binary_accuracy: 0.9579 - mean_squared_error: 461.6421 - val_loss: -1.9245 - val_acc: 0.9820 - val_binary_accuracy: 0.9820 - val_mean_squared_error: 149.8917

Epoch 00132: val_loss did not improve from -1.92469
Epoch 133/150
10000/10000 [==============================] - 762s 76ms/step - loss: -1.9687 - acc: 0.9579 - binary_accuracy: 0.9579 - mean_squared_error: 461.6420 - val_loss: -1.9249 - val_acc: 0.9821 - val_binary_accuracy: 0.9821 - val_mean_squared_error: 149.8916

Epoch 00133: val_loss improved from -1.92469 to -1.92491, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 134/150
10000/10000 [==============================] - 745s 75ms/step - loss: -1.9687 - acc: 0.9578 - binary_accuracy: 0.9578 - mean_squared_error: 461.6418 - val_loss: -1.9245 - val_acc: 0.9820 - val_binary_accuracy: 0.9820 - val_mean_squared_error: 149.8916

Epoch 00134: val_loss did not improve from -1.92491
Epoch 135/150
10000/10000 [==============================] - 753s 75ms/step - loss: -1.9688 - acc: 0.9580 - binary_accuracy: 0.9580 - mean_squared_error: 461.6417 - val_loss: -1.9244 - val_acc: 0.9821 - val_binary_accuracy: 0.9821 - val_mean_squared_error: 149.8916

Epoch 00135: val_loss did not improve from -1.92491
Epoch 136/150
10000/10000 [==============================] - 759s 76ms/step - loss: -1.9691 - acc: 0.9579 - binary_accuracy: 0.9579 - mean_squared_error: 461.6417 - val_loss: -1.9254 - val_acc: 0.9822 - val_binary_accuracy: 0.9822 - val_mean_squared_error: 149.8915

Epoch 00136: val_loss improved from -1.92491 to -1.92537, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 137/150
10000/10000 [==============================] - 736s 74ms/step - loss: -1.9692 - acc: 0.9581 - binary_accuracy: 0.9581 - mean_squared_error: 461.6416 - val_loss: -1.9265 - val_acc: 0.9825 - val_binary_accuracy: 0.9825 - val_mean_squared_error: 149.8917

Epoch 00137: val_loss improved from -1.92537 to -1.92648, saving model to model-tgs-salt-dice-15-10-2018.h5
Epoch 138/150
10000/10000 [==============================] - 723s 72ms/step - loss: -1.9687 - acc: 0.9581 - binary_accuracy: 0.9581 - mean_squared_error: 461.6418 - val_loss: -1.9250 - val_acc: 0.9822 - val_binary_accuracy: 0.9822 - val_mean_squared_error: 149.8915

Epoch 00138: val_loss did not improve from -1.92648
Epoch 139/150
10000/10000 [==============================] - 749s 75ms/step - loss: -1.9688 - acc: 0.9579 - binary_accuracy: 0.9579 - mean_squared_error: 461.6420 - val_loss: -1.9245 - val_acc: 0.9819 - val_binary_accuracy: 0.9819 - val_mean_squared_error: 149.8915

Epoch 00139: val_loss did not improve from -1.92648
Epoch 140/150
10000/10000 [==============================] - 742s 74ms/step - loss: -1.9690 - acc: 0.9581 - binary_accuracy: 0.9581 - mean_squared_error: 461.6416 - val_loss: -1.9240 - val_acc: 0.9819 - val_binary_accuracy: 0.9819 - val_mean_squared_error: 149.8916

Epoch 00140: val_loss did not improve from -1.92648
Epoch 141/150
10000/10000 [==============================] - 745s 75ms/step - loss: -1.9690 - acc: 0.9580 - binary_accuracy: 0.9580 - mean_squared_error: 461.6415 - val_loss: -1.9255 - val_acc: 0.9823 - val_binary_accuracy: 0.9823 - val_mean_squared_error: 149.8916

Epoch 00141: val_loss did not improve from -1.92648
Epoch 142/150
10000/10000 [==============================] - 727s 73ms/step - loss: -1.9691 - acc: 0.9582 - binary_accuracy: 0.9582 - mean_squared_error: 461.6415 - val_loss: -1.9256 - val_acc: 0.9822 - val_binary_accuracy: 0.9822 - val_mean_squared_error: 149.8915

Epoch 00142: val_loss did not improve from -1.92648
Epoch 143/150
10000/10000 [==============================] - 749s 75ms/step - loss: -1.9689 - acc: 0.9581 - binary_accuracy: 0.9581 - mean_squared_error: 461.6416 - val_loss: -1.9255 - val_acc: 0.9822 - val_binary_accuracy: 0.9822 - val_mean_squared_error: 149.8915

Epoch 00143: val_loss did not improve from -1.92648
Epoch 144/150
10000/10000 [==============================] - 726s 73ms/step - loss: -1.9689 - acc: 0.9580 - binary_accuracy: 0.9580 - mean_squared_error: 461.6416 - val_loss: -1.9264 - val_acc: 0.9824 - val_binary_accuracy: 0.9824 - val_mean_squared_error: 149.8914

Epoch 00144: val_loss did not improve from -1.92648
Epoch 145/150
10000/10000 [==============================] - 761s 76ms/step - loss: -1.9690 - acc: 0.9582 - binary_accuracy: 0.9582 - mean_squared_error: 461.6418 - val_loss: -1.9258 - val_acc: 0.9821 - val_binary_accuracy: 0.9821 - val_mean_squared_error: 149.8914

Epoch 00145: val_loss did not improve from -1.92648
Epoch 146/150
10000/10000 [==============================] - 737s 74ms/step - loss: -1.9690 - acc: 0.9579 - binary_accuracy: 0.9579 - mean_squared_error: 461.6417 - val_loss: -1.9261 - val_acc: 0.9823 - val_binary_accuracy: 0.9823 - val_mean_squared_error: 149.8914

Epoch 00146: val_loss did not improve from -1.92648
Epoch 147/150
10000/10000 [==============================] - 761s 76ms/step - loss: -1.9690 - acc: 0.9583 - binary_accuracy: 0.9583 - mean_squared_error: 461.6412 - val_loss: -1.9262 - val_acc: 0.9824 - val_binary_accuracy: 0.9824 - val_mean_squared_error: 149.8914

Epoch 00147: val_loss did not improve from -1.92648
Epoch 00147: early stopping

Evaluate

In [39]:
# EVALUATE
# Load Weights
model.load_weights('model-tgs-salt-dice-15-10-2018.h5')

# Evaluate the Model on Random Images
for i in range(100,150):
    # Predict
    original_image = x_valid[i]
    label_image = y_valid[i]
    start_time = time.time()
    prediction_image = model.predict(np.expand_dims(original_image, 0))[0]
    end_time = time.time()
    print('Computation Time:', end_time - start_time)
    
    # Plot
    fig, (axis1, axis2, axis3) = plt.subplots(1, 3, figsize = (15,15))
    original_image = resize(original_image, (200, 560), anti_aliasing=True)
    label_image = resize(label_image, (200, 560), anti_aliasing=True)
    prediction_image = resize(prediction_image, (200, 560), anti_aliasing=True)
    axis1.imshow(original_image[:,:,0], cmap = 'jet')
    axis1.set_title('Input Image')
    axis2.imshow(label_image[:,:,0], cmap = 'jet')
    axis2.set_title('Label')
    axis3.imshow(prediction_image[:,:,0], cmap = 'jet')
    axis3.set_title('Prediction')
Computation Time: 0.08404684066772461
Computation Time: 0.015869617462158203
Computation Time: 0.015781879425048828
/home/avidbots/.local/lib/python3.5/site-packages/skimage/transform/_warps.py:105: UserWarning: The default mode, 'constant', will be changed to 'reflect' in skimage 0.15.
  warn("The default mode, 'constant', will be changed to 'reflect' in "
/home/avidbots/.local/lib/python3.5/site-packages/skimage/util/dtype.py:130: UserWarning: Possible precision loss when converting from int64 to float64
  .format(dtypeobj_in, dtypeobj_out))
Computation Time: 0.015794038772583008
Computation Time: 0.016267061233520508
Computation Time: 0.0159761905670166
Computation Time: 0.01578068733215332
Computation Time: 0.015628337860107422
Computation Time: 0.015999555587768555
Computation Time: 0.01531219482421875
Computation Time: 0.015579938888549805
Computation Time: 0.017029762268066406
Computation Time: 0.01714491844177246
Computation Time: 0.01556849479675293
Computation Time: 0.015555143356323242
Computation Time: 0.01562190055847168
Computation Time: 0.016442537307739258
Computation Time: 0.02344346046447754
Computation Time: 0.05768442153930664
Computation Time: 0.058444976806640625
Computation Time: 0.02685999870300293
Computation Time: 0.015408992767333984
Computation Time: 0.020606040954589844
/home/avidbots/.local/lib/python3.5/site-packages/matplotlib/pyplot.py:537: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
  max_open_warning, RuntimeWarning)
Computation Time: 0.016616344451904297
Computation Time: 0.015924692153930664
Computation Time: 0.018291473388671875
Computation Time: 0.015749454498291016
Computation Time: 0.018761873245239258
Computation Time: 0.015436649322509766
Computation Time: 0.016410350799560547
Computation Time: 0.015928030014038086
Computation Time: 0.015796422958374023
Computation Time: 0.015645265579223633
Computation Time: 0.017270326614379883
Computation Time: 0.01583242416381836
Computation Time: 0.0158846378326416
Computation Time: 0.01585841178894043
Computation Time: 0.017305612564086914
Computation Time: 0.016403675079345703
Computation Time: 0.01614832878112793
Computation Time: 0.01792311668395996
Computation Time: 0.018211841583251953
Computation Time: 0.015735149383544922
Computation Time: 0.015439987182617188
Computation Time: 0.015512228012084961
Computation Time: 0.017705440521240234
Computation Time: 0.01566171646118164
Computation Time: 0.01690220832824707
Computation Time: 0.015688657760620117
Computation Time: 0.014836311340332031